%matplotlib inline
import warnings
warnings.filterwarnings("ignore")
import sqlite3
import pandas as pd
import numpy as np
import nltk
import string
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics import confusion_matrix
from sklearn import metrics
from sklearn.metrics import roc_curve, auc
from nltk.stem.porter import PorterStemmer
import re
# Tutorial about Python regular expressions: https://pymotw.com/2/re/
import string
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from nltk.stem.wordnet import WordNetLemmatizer
import pickle
from tqdm import tqdm
import os
from plotly import plotly
import plotly.offline as offline
import plotly.graph_objs as go
offline.init_notebook_mode()
from collections import Counter
project_data = pd.read_csv('train_data.csv')
resource_data = pd.read_csv('resources.csv')
project_data.isnull().sum()
#filling 3 null teacher prefix values with Teacher
project_data["teacher_prefix"].fillna("Teacher",inplace = True)
project_data.isnull().sum()
# merge two column text dataframe:
project_data["essay"] = project_data["project_essay_1"].map(str) +\
project_data["project_essay_2"].map(str) + \
project_data["project_essay_3"].map(str) + \
project_data["project_essay_4"].map(str)
price_data = resource_data.groupby('id').agg({'price':'sum', 'quantity':'sum'}).reset_index()
project_data = pd.merge(project_data, price_data, on='id', how='left')
project_data.info()
#splitting data as 30% to test
y = project_data["project_is_approved"]
X = project_data.drop("project_is_approved",axis = 1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=42)
print(X_train.shape," ",y_train.shape)
print(X_test.shape," ",y_test.shape)
#using code from assignment
# project subject categories
catogories = list(X_train['project_subject_categories'].values)
cat_list = []
for i in catogories:
temp = ""
for j in i.split(','):
if 'The' in j.split():
j=j.replace('The','')
j = j.replace(' ','')
temp+=j.strip()+" "
temp = temp.replace('&','_')
cat_list.append(temp.strip())
X_train['clean_categories'] = cat_list
X_train.drop(['project_subject_categories'], axis=1, inplace=True)
from collections import Counter
my_counter = Counter()
for word in X_train['clean_categories'].values:
my_counter.update(word.split())
cat_dict = dict(my_counter)
sorted_cat_dict = dict(sorted(cat_dict.items(), key=lambda kv: kv[1]))
# project subject categories for test data
catogories = list(X_test['project_subject_categories'].values)
cat_list = []
for i in catogories:
temp = ""
for j in i.split(','):
if 'The' in j.split():
j=j.replace('The','')
j = j.replace(' ','')
temp+=j.strip()+" "
temp = temp.replace('&','_')
cat_list.append(temp.strip())
X_test['clean_categories'] = cat_list
X_test.drop(['project_subject_categories'], axis=1, inplace=True)
sub_catogories = list(X_train['project_subject_subcategories'].values)
sub_cat_list = []
for i in sub_catogories:
temp = ""
for j in i.split(','):
if 'The' in j.split(): # this will split each of the catogory based on space "Math & Science"=> "Math","&", "Science"
j=j.replace('The','') # if we have the words "The" we are going to replace it with ''(i.e removing 'The')
j = j.replace(' ','')
temp +=j.strip()+" "
temp = temp.replace('&','_')
sub_cat_list.append(temp.strip())
X_train['clean_subcategories'] = sub_cat_list
X_train.drop(['project_subject_subcategories'], axis=1, inplace=True)
# count of all the words in corpus python: https://stackoverflow.com/a/22898595/4084039
my_counter = Counter()
for word in X_train['clean_subcategories'].values:
my_counter.update(word.split())
sub_cat_dict = dict(my_counter)
sorted_sub_cat_dict = dict(sorted(sub_cat_dict.items(), key=lambda kv: kv[1]))
sub_catogories = list(X_test['project_subject_subcategories'].values)
sub_cat_list = []
for i in sub_catogories:
temp = ""
for j in i.split(','):
if 'The' in j.split(): # this will split each of the catogory based on space "Math & Science"=> "Math","&", "Science"
j=j.replace('The','') # if we have the words "The" we are going to replace it with ''(i.e removing 'The')
j = j.replace(' ','')
temp +=j.strip()+" "
temp = temp.replace('&','_')
sub_cat_list.append(temp.strip())
X_test['clean_subcategories'] = sub_cat_list
X_test.drop(['project_subject_subcategories'], axis=1, inplace=True)
#preprocessing teacher prefix
prefix = list(X_train['teacher_prefix'].values)
prefix_list = []
for i in prefix:
temp = ""
if "." in i:
i=i.replace('.','')
temp+=i.strip()+" "
prefix_list.append(temp.strip())
X_train['clean_prefix'] = prefix_list
my_counter = Counter()
for word in X_train['clean_prefix'].values:
my_counter.update(word.split())
prefix_dict = dict(my_counter)
sorted_prefix_dict = dict(sorted(prefix_dict.items(), key=lambda kv: kv[1]))
print(sorted_prefix_dict)
#preprocessing teacher prefix for test data
prefix = list(X_test['teacher_prefix'].values)
prefix_list = []
for i in prefix:
temp = ""
if "." in i:
i=i.replace('.','')
temp+=i.strip()+" "
prefix_list.append(temp.strip())
X_test['clean_prefix'] = prefix_list
# preprocessing of grade category for train data
grade = list(X_train['project_grade_category'].values)
grade_list = []
for i in grade:
temp = ""
if "Grades" in i:
i = i.replace("Grades","")
if "6-8" in i:
i = i.replace("6-8","six_eight")
if "3-5" in i:
i = i.replace("3-5","three_five")
if "9-12" in i:
i = i.replace("9-12","nine_twelve")
if "PreK-2" in i:
i = i.replace("PreK-2","prek_two")
temp+=i.strip()+" "
grade_list.append(temp.strip())
X_train['clean_grade'] = grade_list
my_counter = Counter()
for word in X_train['clean_grade'].values:
my_counter.update(word.split())
grade_dict = dict(my_counter)
sorted_grade_dict = dict(sorted(grade_dict.items(), key=lambda kv: kv[1]))
print(sorted_grade_dict)
# preprocessing of grade category for test data
grade = list(X_test['project_grade_category'].values)
grade_list = []
for i in grade:
temp = ""
if "Grades" in i:
i = i.replace("Grades","")
if "6-8" in i:
i = i.replace("6-8","six_eight")
if "3-5" in i:
i = i.replace("3-5","three_five")
if "9-12" in i:
i = i.replace("9-12","nine_twelve")
if "PreK-2" in i:
i = i.replace("PreK-2","prek_two")
temp+=i.strip()+" "
grade_list.append(temp.strip())
X_test['clean_grade'] = grade_list
#no need of preprocessing on school state
state = X_train["school_state"].value_counts()
sorted_state = dict(state)
sorted_state_dict = dict(sorted(sorted_state.items(), key=lambda kv: kv[1]))
X_train["clean_state"] = X_train["school_state"]
#similarly for X_test
X_test["clean_state"] = X_test["school_state"]
from sklearn.preprocessing import StandardScaler
price_scalar = StandardScaler()
price_scalar.fit(project_data['price'].values.reshape(-1,1))
print(f"Mean : {price_scalar.mean_[0]}, Standard deviation : {np.sqrt(price_scalar.var_[0])}")
#train data price standardization
price_standardized = price_scalar.transform(X_train['price'].values.reshape(-1, 1))
#test data price stanardization. Fit method applied on X_train
test_price_standardized = price_scalar.transform(X_test['price'].values.reshape(-1, 1))
price_scalar = StandardScaler()
price_scalar.fit(X_train["quantity"].values.reshape(-1, 1))
print(f"Mean of Quantity : {price_scalar.mean_[0]}, Standard deviation of Quantity : {np.sqrt(price_scalar.var_[0])}")
#train data quantity standardization
quantity_standardized = price_scalar.transform(X_train["quantity"].values.reshape(-1, 1))
#test data quantity stanardization. Fit method applied on X_train
test_quantity_standardized = price_scalar.transform(X_test["quantity"].values.reshape(-1, 1))
price_scalar = StandardScaler()
price_scalar.fit(X_train['teacher_number_of_previously_posted_projects'].values.reshape(-1,1))
print(f"Mean : {price_scalar.mean_[0]}, Standard deviation : {np.sqrt(price_scalar.var_[0])}")
#train data ppp standardization
number_ppp_standardized = price_scalar.transform(X_train['teacher_number_of_previously_posted_projects'].values.reshape(-1, 1))
#test data price stanardization. Fit method applied on X_train
test_number_ppp_standardized = price_scalar.transform(X_test['teacher_number_of_previously_posted_projects'].values.reshape(-1, 1))
#using function and stopwords form assignemnt
import re
def decontracted(phrase):
# specific
phrase = re.sub(r"won't", "will not", phrase)
phrase = re.sub(r"can\'t", "can not", phrase)
# general
phrase = re.sub(r"n\'t", " not", phrase)
phrase = re.sub(r"\'re", " are", phrase)
phrase = re.sub(r"\'s", " is", phrase)
phrase = re.sub(r"\'d", " would", phrase)
phrase = re.sub(r"\'ll", " will", phrase)
phrase = re.sub(r"\'t", " not", phrase)
phrase = re.sub(r"\'ve", " have", phrase)
phrase = re.sub(r"\'m", " am", phrase)
return phrase
# we are removing the words from the stop words list: 'no', 'nor', 'not'
stopwords= ['i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves', 'you', "you're", "you've",\
"you'll", "you'd", 'your', 'yours', 'yourself', 'yourselves', 'he', 'him', 'his', 'himself', \
'she', "she's", 'her', 'hers', 'herself', 'it', "it's", 'its', 'itself', 'they', 'them', 'their',\
'theirs', 'themselves', 'what', 'which', 'who', 'whom', 'this', 'that', "that'll", 'these', 'those', \
'am', 'is', 'are', 'was', 'were', 'be', 'been', 'being', 'have', 'has', 'had', 'having', 'do', 'does', \
'did', 'doing', 'a', 'an', 'the', 'and', 'but', 'if', 'or', 'because', 'as', 'until', 'while', 'of', \
'at', 'by', 'for', 'with', 'about', 'against', 'between', 'into', 'through', 'during', 'before', 'after',\
'above', 'below', 'to', 'from', 'up', 'down', 'in', 'out', 'on', 'off', 'over', 'under', 'again', 'further',\
'then', 'once', 'here', 'there', 'when', 'where', 'why', 'how', 'all', 'any', 'both', 'each', 'few', 'more',\
'most', 'other', 'some', 'such', 'only', 'own', 'same', 'so', 'than', 'too', 'very', \
's', 't', 'can', 'will', 'just', 'don', "don't", 'should', "should've", 'now', 'd', 'll', 'm', 'o', 're', \
've', 'y', 'ain', 'aren', "aren't", 'couldn', "couldn't", 'didn', "didn't", 'doesn', "doesn't", 'hadn',\
"hadn't", 'hasn', "hasn't", 'haven', "haven't", 'isn', "isn't", 'ma', 'mightn', "mightn't", 'mustn',\
"mustn't", 'needn', "needn't", 'shan', "shan't", 'shouldn', "shouldn't", 'wasn', "wasn't", 'weren', "weren't", \
'won', "won't", 'wouldn', "wouldn't"]
from tqdm import tqdm
#for train data
preprocessed_essays = []
# tqdm is for printing the status bar
for sentance in tqdm(X_train['essay'].values):
sent = decontracted(sentance)
sent = sent.replace('\\r', ' ')
sent = sent.replace('\\"', ' ')
sent = sent.replace('\\n', ' ')
sent = re.sub('[^A-Za-z0-9]+', ' ', sent)
# https://gist.github.com/sebleier/554280
sent = ' '.join(e for e in sent.split() if e not in stopwords)
preprocessed_essays.append(sent.lower().strip())
test_preprocessed_essays = []
# tqdm is for printing the status bar
for sentance in tqdm(X_test['essay'].values):
sent = decontracted(sentance)
sent = sent.replace('\\r', ' ')
sent = sent.replace('\\"', ' ')
sent = sent.replace('\\n', ' ')
sent = re.sub('[^A-Za-z0-9]+', ' ', sent)
# https://gist.github.com/sebleier/554280
sent = ' '.join(e for e in sent.split() if e not in stopwords)
test_preprocessed_essays.append(sent.lower().strip())
preprocessed_title = []
# tqdm is for printing the status bar
for sentance in tqdm(X_train['project_title'].values):
sent = decontracted(sentance)
sent = sent.replace('\\r', ' ')
sent = sent.replace('\\"', ' ')
sent = sent.replace('\\n', ' ')
sent = re.sub('[^A-Za-z0-9]+', ' ', sent)
# https://gist.github.com/sebleier/554280
sent = ' '.join(e for e in sent.split() if e not in stopwords)
preprocessed_title.append(sent.lower().strip())
# for test data
test_preprocessed_title = []
# tqdm is for printing the status bar
for sentance in tqdm(X_test['project_title'].values):
sent = decontracted(sentance)
sent = sent.replace('\\r', ' ')
sent = sent.replace('\\"', ' ')
sent = sent.replace('\\n', ' ')
sent = re.sub('[^A-Za-z0-9]+', ' ', sent)
# https://gist.github.com/sebleier/554280
sent = ' '.join(e for e in sent.split() if e not in stopwords)
test_preprocessed_title.append(sent.lower().strip())
vectorizer = CountVectorizer(vocabulary=list(sorted_cat_dict.keys()), lowercase=False, binary=True)
# fitting on train data
vectorizer.fit(X_train['clean_categories'].values)
print(vectorizer.get_feature_names())
categories_feature = vectorizer.get_feature_names()
# for train data
categories_one_hot = vectorizer.transform(X_train['clean_categories'].values)
print("Shape of matrix after one hot encodig ",categories_one_hot.shape)
# for test data
test_categories_one_hot = vectorizer.transform(X_test['clean_categories'].values)
vectorizer = CountVectorizer(vocabulary=list(sorted_sub_cat_dict.keys()), lowercase=False, binary=True)
# fitting on train data
vectorizer.fit(X_train['clean_subcategories'].values)
print(vectorizer.get_feature_names())
subcategories_feature = vectorizer.get_feature_names()
# for train data
sub_categories_one_hot = vectorizer.transform(X_train['clean_subcategories'].values)
print("Shape of matrix after one hot encodig ",sub_categories_one_hot.shape)
# for test data
test_sub_categories_one_hot = vectorizer.transform(X_test['clean_subcategories'].values)
vectorizer = CountVectorizer(vocabulary=list(prefix_dict.keys()), lowercase=False, binary=True)
# fitting on train data
vectorizer.fit(X_train['clean_prefix'].values)
print(vectorizer.get_feature_names())
prefix_feature = vectorizer.get_feature_names()
# for train data
prefix_one_hot = vectorizer.transform(X_train['clean_prefix'].values)
print("Shape of matrix after one hot encodig ",prefix_one_hot.shape)
# for test data
test_prefix_one_hot = vectorizer.transform(X_test['clean_prefix'].values)
vectorizer = CountVectorizer(vocabulary=list(grade_dict.keys()), lowercase=False, binary=True)
# fitting on train data
vectorizer.fit(X_train['clean_grade'].values)
print(vectorizer.get_feature_names())
grade_feature = vectorizer.get_feature_names()
# for train data
grade_one_hot = vectorizer.transform(X_train['clean_grade'].values)
print("Shape of matrix after one hot encodig ",grade_one_hot.shape)
# for test data
test_grade_one_hot = vectorizer.transform(X_test['clean_grade'].values)
vectorizer = CountVectorizer(vocabulary=list(sorted_state_dict.keys()), lowercase=False, binary=True)
vectorizer.fit(X_train['clean_state'].values)
print(vectorizer.get_feature_names())
state_one_hot = vectorizer.transform(X_train['clean_state'].values)
state_feature = vectorizer.get_feature_names()
test_state_one_hot = vectorizer.transform(X_test['clean_state'].values)
vectorizer = CountVectorizer(min_df=10,ngram_range=(2,2),max_features=5000)
#fit using train data
vectorizer.fit(preprocessed_essays)
essay_feature = vectorizer.get_feature_names()
# for train data
text_bow = vectorizer.transform(preprocessed_essays)
print("Shape of train matrix : ",text_bow.shape)
# for test data
test_text_bow = vectorizer.transform(test_preprocessed_essays)
print("Shape of test matrix : ",test_text_bow.shape)
# for title
vectorizer.fit(preprocessed_title)
title_feature = vectorizer.get_feature_names()
# for train data
title_bow = vectorizer.transform(preprocessed_title)
print("Shape of train matrix : ",title_bow.shape)
# for test data
test_title_bow = vectorizer.transform(test_preprocessed_title)
print("Shape of test matrix : ",test_title_bow.shape)
vectorizer = TfidfVectorizer(min_df=10,ngram_range=(2,2),max_features=5000)
#fit using train data
vectorizer.fit(preprocessed_essays)
essay_feature_tfidf = vectorizer.get_feature_names()
# for train data
text_tfidf = vectorizer.transform(preprocessed_essays)
print("Shape of train matrix : ",text_tfidf.shape)
# for test data
test_text_tfidf = vectorizer.transform(test_preprocessed_essays)
print("Shape of test matrix : ",test_text_tfidf.shape)
# for title
vectorizer.fit(preprocessed_title)
title_feature_tfidf = vectorizer.get_feature_names()
# for train data
title_tfidf = vectorizer.transform(preprocessed_title)
print("Shape of train matrix : ",title_tfidf.shape)
# for test data
test_title_tfidf = vectorizer.transform(test_preprocessed_title)
print("Shape of test matrix : ",test_title_tfidf.shape)
with open('glove_vectors', 'rb') as f:
model = pickle.load(f)
glove_words = set(model.keys())
# for train data
avg_w2v_vectors = []; # the avg-w2v for each sentence/review is stored in this list
for sentence in tqdm(preprocessed_essays): # for each review/sentence
vector = np.zeros(300) # as word vectors are of zero length
cnt_words =0; # num of words with a valid vector in the sentence/review
for word in sentence.split(): # for each word in a review/sentence
if word in glove_words:
vector += model[word]
cnt_words += 1
if cnt_words != 0:
vector /= cnt_words
avg_w2v_vectors.append(vector)
print(len(avg_w2v_vectors))
print(len(avg_w2v_vectors[0]))
# for test data
test_avg_w2v_vectors = [] # the avg-w2v for each sentence/review is stored in this list
for sentence in tqdm(test_preprocessed_essays): # for each review/sentence
vector = np.zeros(300) # as word vectors are of zero length
cnt_words =0; # num of words with a valid vector in the sentence/review
for word in sentence.split(): # for each word in a review/sentence
if word in glove_words:
vector += model[word]
cnt_words += 1
if cnt_words != 0:
vector /= cnt_words
test_avg_w2v_vectors.append(vector)
print(len(test_avg_w2v_vectors))
print(len(test_avg_w2v_vectors[0]))
title_avg_w2v_vectors = []
for sentence in tqdm(preprocessed_title):
vector = np.zeros(300)
cnt_words =0;
for word in sentence.split():
if word in glove_words:
vector += model[word]
cnt_words += 1
if cnt_words != 0:
vector /= cnt_words
title_avg_w2v_vectors.append(vector)
print(len(title_avg_w2v_vectors))
print(len(title_avg_w2v_vectors[0]))
# for test data
test_title_avg_w2v_vectors = []
for sentence in tqdm(test_preprocessed_title):
vector = np.zeros(300)
cnt_words =0;
for word in sentence.split():
if word in glove_words:
vector += model[word]
cnt_words += 1
if cnt_words != 0:
vector /= cnt_words
test_title_avg_w2v_vectors.append(vector)
print(len(test_title_avg_w2v_vectors))
print(len(test_title_avg_w2v_vectors[0]))
test_tfidf_model = TfidfVectorizer()
test_tfidf_model.fit(preprocessed_essays)
# we are converting a dictionary with word as a key, and the idf as a value
dictionary = dict(zip(test_tfidf_model.get_feature_names(), list(test_tfidf_model.idf_)))
tfidf_words = set(test_tfidf_model.get_feature_names())
test_tfidf_w2v_vectors = []; # the avg-w2v for each sentence/review is stored in this list
for sentence in tqdm(test_preprocessed_essays): # for each review/sentence
vector = np.zeros(300) # as word vectors are of zero length
tf_idf_weight =0; # num of words with a valid vector in the sentence/review
for word in sentence.split(): # for each word in a review/sentence
if (word in glove_words) and (word in tfidf_words):
vec = model[word] # getting the vector for each word
# here we are multiplying idf value(dictionary[word]) and the tf value((sentence.count(word)/len(sentence.split())))
tf_idf = dictionary[word]*(sentence.count(word)/len(sentence.split())) # getting the tfidf value for each word
vector += (vec * tf_idf) # calculating tfidf weighted w2v
tf_idf_weight += tf_idf
if tf_idf_weight != 0:
vector /= tf_idf_weight
test_tfidf_w2v_vectors.append(vector)
print(len(test_tfidf_w2v_vectors))
print(len(test_tfidf_w2v_vectors[0]))
# for title
test_tfidf_model.fit(preprocessed_title)
dictionary = dict(zip(test_tfidf_model.get_feature_names(), list(test_tfidf_model.idf_)))
tfidf_words = set(test_tfidf_model.get_feature_names())
test_title_tfidf_w2v_vectors = []; # the avg-w2v for each sentence/review is stored in this list
for sentence in tqdm(test_preprocessed_title): # for each review/sentence
vector = np.zeros(300) # as word vectors are of zero length
tf_idf_weight =0; # num of words with a valid vector in the sentence/review
for word in sentence.split(): # for each word in a review/sentence
if (word in glove_words) and (word in tfidf_words):
vec = model[word] # getting the vector for each word
# here we are multiplying idf value(dictionary[word]) and the tf value((sentence.count(word)/len(sentence.split())))
tf_idf = dictionary[word]*(sentence.count(word)/len(sentence.split())) # getting the tfidf value for each word
vector += (vec * tf_idf) # calculating tfidf weighted w2v
tf_idf_weight += tf_idf
if tf_idf_weight != 0:
vector /= tf_idf_weight
test_title_tfidf_w2v_vectors.append(vector)
print(len(test_title_tfidf_w2v_vectors))
tfidf_model = TfidfVectorizer()
tfidf_model.fit(preprocessed_essays)
# we are converting a dictionary with word as a key, and the idf as a value
dictionary = dict(zip(tfidf_model.get_feature_names(), list(tfidf_model.idf_)))
tfidf_words = set(tfidf_model.get_feature_names())
tfidf_w2v_vectors = []; # the avg-w2v for each sentence/review is stored in this list
for sentence in tqdm(preprocessed_essays): # for each review/sentence
vector = np.zeros(300) # as word vectors are of zero length
tf_idf_weight =0; # num of words with a valid vector in the sentence/review
for word in sentence.split(): # for each word in a review/sentence
if (word in glove_words) and (word in tfidf_words):
vec = model[word] # getting the vector for each word
# here we are multiplying idf value(dictionary[word]) and the tf value((sentence.count(word)/len(sentence.split())))
tf_idf = dictionary[word]*(sentence.count(word)/len(sentence.split())) # getting the tfidf value for each word
vector += (vec * tf_idf) # calculating tfidf weighted w2v
tf_idf_weight += tf_idf
if tf_idf_weight != 0:
vector /= tf_idf_weight
tfidf_w2v_vectors.append(vector)
print(len(tfidf_w2v_vectors))
print(len(tfidf_w2v_vectors[0]))
# for title
tfidf_model.fit(preprocessed_title)
dictionary = dict(zip(tfidf_model.get_feature_names(), list(tfidf_model.idf_)))
tfidf_words = set(tfidf_model.get_feature_names())
title_tfidf_w2v_vectors = []; # the avg-w2v for each sentence/review is stored in this list
for sentence in tqdm(preprocessed_title): # for each review/sentence
vector = np.zeros(300) # as word vectors are of zero length
tf_idf_weight =0; # num of words with a valid vector in the sentence/review
for word in sentence.split(): # for each word in a review/sentence
if (word in glove_words) and (word in tfidf_words):
vec = model[word] # getting the vector for each word
# here we are multiplying idf value(dictionary[word]) and the tf value((sentence.count(word)/len(sentence.split())))
tf_idf = dictionary[word]*(sentence.count(word)/len(sentence.split())) # getting the tfidf value for each word
vector += (vec * tf_idf) # calculating tfidf weighted w2v
tf_idf_weight += tf_idf
if tf_idf_weight != 0:
vector /= tf_idf_weight
title_tfidf_w2v_vectors.append(vector)
print(len(title_tfidf_w2v_vectors))
Printing all
print("*"*70)
print("Categorical Features that are considered :- ")
print("Subject Categories :- ",categories_one_hot.shape)
print("Subject Sub-Categories :- ",sub_categories_one_hot.shape)
print("Sudent Grade :- ",grade_one_hot.shape)
print("School State :- ",state_one_hot.shape)
print("Teacher Prefix :- ",prefix_one_hot.shape)
print("*"*70)
print("Text Features that are considered :- ")
print("*"*70)
print("Project Essay BOW:- ",text_bow.shape)
print("Project Essay TFIDF:- ",text_tfidf.shape)
print("*"*70)
print("Project Title BOW:- ",title_bow.shape)
print("Project Title TFIDF:- ",title_tfidf.shape)
print("*"*70)
#combining all feature into one
from scipy.sparse import hstack
set1 = hstack((categories_one_hot,sub_categories_one_hot,prefix_one_hot,grade_one_hot,state_one_hot,price_standardized,quantity_standardized,number_ppp_standardized,text_bow,title_bow))
set1_t = hstack((test_categories_one_hot,test_sub_categories_one_hot,test_prefix_one_hot,test_grade_one_hot,test_state_one_hot,test_price_standardized,test_quantity_standardized,test_number_ppp_standardized,test_text_bow,test_title_bow))
set2 = hstack((categories_one_hot,sub_categories_one_hot,prefix_one_hot,state_one_hot,grade_one_hot,text_tfidf,title_tfidf,price_standardized,quantity_standardized,number_ppp_standardized))
set2_t = hstack((test_categories_one_hot,test_sub_categories_one_hot,test_prefix_one_hot,test_state_one_hot,test_grade_one_hot,test_text_tfidf,test_title_tfidf,test_price_standardized,test_quantity_standardized,test_number_ppp_standardized))
set3 = hstack((categories_one_hot,sub_categories_one_hot,prefix_one_hot,state_one_hot,grade_one_hot,price_standardized,quantity_standardized,number_ppp_standardized,avg_w2v_vectors,title_avg_w2v_vectors))
set3_t = hstack((test_categories_one_hot,test_sub_categories_one_hot,test_prefix_one_hot,test_state_one_hot,test_grade_one_hot,test_price_standardized,test_quantity_standardized,test_number_ppp_standardized,test_avg_w2v_vectors,test_title_avg_w2v_vectors))
set4 = hstack((categories_one_hot,sub_categories_one_hot,prefix_one_hot,state_one_hot,grade_one_hot,price_standardized,quantity_standardized,number_ppp_standardized,tfidf_w2v_vectors,title_tfidf_w2v_vectors))
set4_t = hstack((test_categories_one_hot,test_sub_categories_one_hot,test_prefix_one_hot,test_state_one_hot,test_grade_one_hot,test_price_standardized,test_quantity_standardized,test_number_ppp_standardized,test_tfidf_w2v_vectors,test_title_tfidf_w2v_vectors))
print(set1.shape,"\t",set1_t.shape)
print(set2.shape,"\t",set2_t.shape)
print(set3.shape,"\t",set3_t.shape)
print(set4.shape,"\t",set4_t.shape)
set_feature = categories_feature + subcategories_feature + prefix_feature + grade_feature + state_feature + essay_feature_tfidf + title_feature_tfidf
set_feature.append("price")
set_feature.append("quantity")
set_feature.append("number")
print(len(set_feature))
param_grid = dict(max_depth = [1, 5, 10, 50, 100],min_samples_split = [5, 10, 100, 500])
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import export_graphviz
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import confusion_matrix
from sklearn.metrics import roc_curve
from wordcloud import WordCloud, STOPWORDS
def Plot_wordcloud(words):
"""
Function for plotting wordcloud.
"""
wordcloud = WordCloud(width = 800, height = 800, background_color ='white', stopwords = stopwords,
min_font_size = 10).generate(words)
# plot the WordCloud image
plt.figure(figsize = (8,8), facecolor = None)
plt.imshow(wordcloud)
plt.axis("off")
plt.tight_layout(pad = 0)
plt.title("Word Cloud Plot")
plt.show()
dt = DecisionTreeClassifier(class_weight="balanced")
grid = GridSearchCV(dt,param_grid,scoring="roc_auc",n_jobs=-1,cv=5)
grid.fit(set1,y_train)
print(grid.best_estimator_)
print(grid.best_index_)
print(grid.best_params_)
print(grid.best_score_)
#converting results to dataframe
df = pd.DataFrame(data = grid.cv_results_)
# getting into list
test_score = df["mean_test_score"].values
train_score = df["mean_train_score"].values
print(train_score)
print(test_score )
x = [x for x in range(len(train_score))]
max_depth = [1, 5, 10, 50, 100]
plt.figure(figsize=(10,20))
for i,ms in enumerate(param_grid["min_samples_split"]):
temp = [x for x in range(i,(16)+i+1,4)]
train = [train_score[x] for x in temp]
test = [test_score[x] for x in temp]
plt.subplot(4,1,i+1)
plt.title("min_split: "+str(ms))
plt.plot(max_depth,train )
plt.plot(max_depth,test)
plt.legend(["train","test"])
plt.xlabel("Max_depth")
plt.ylabel("Mean scores")
#plt.figure(figsize=(10,20))
y1_predict = grid.predict(set1_t)
cm1 = confusion_matrix(y_test,y1_predict)
# https://seaborn.pydata.org/generated/seaborn.heatmap.html
sns.heatmap(cm1, annot=True, fmt="d")
plt.ylabel("Actual Class")
plt.xlabel("Predicted Class")
plt.title("Confusion Matrix")
Word Cloud plot
# condition for filtering out false positive data points
l = (y1_predict != y_test) & (y_test ==0)
dump = pd.DataFrame(data=l)
# getting only true points
a = dump.loc[dump["project_is_approved"] == True]
# getting index of false positive data points
ilc = a.index.values.tolist()
dummy = []
for index in ilc:
dummy.append(X_test.loc[index])
# false positive data point dataframe
false_positive = pd.DataFrame(data = dummy)
# getting all text feature on one feature
false_positive["text_data"] = false_positive["essay"] + false_positive["project_title"]
words = " "
for ew in false_positive["text_data"]:
ew = ew.lower()
tokens = ew.split()
for w in tokens:
words += w
# plotting word cloud
Plot_wordcloud(words)
Box plot of price for false datapoints
false_positive.boxplot(column = ["price"])
plt.ylim((0,1000))
plt.ylabel("Precentiles")
plt.title("Box plot of price for false datapoints")
pdf for teacher_number_of_previously_posted_projects
false_positive["teacher_number_of_previously_posted_projects"].plot.kde()
plt.xlabel("Number of previously posted projects")
plt.title("pdf for teacher_number_of_previously_posted_projects")
AUC plotting
# probabilities calcultion
y1_predict_prob = grid.predict_proba(set1_t)[:,1]
y1_predict_prob_train = grid.predict_proba(set1)[:,1]
# took referance from https://stackoverflow.com/questions/25009284/how-to-plot-roc-curve-in-python
#fpr,tpr
fpr,tpr,thre = roc_curve(y_test,y1_predict_prob)
# am i doing it right here......?
fpr_train,tpr_train,thre_train = roc_curve(y_train,y1_predict_prob_train)
# auc calculation for test data
roc_auc1 = metrics.auc(fpr,tpr)
# auc calculation for train data
roc_auc_train1 = metrics.auc(fpr_train,tpr_train)
# took referance from https://stackoverflow.com/questions/25009284/how-to-plot-roc-curve-in-python
plt.plot(fpr,tpr,"b--",label = 'AUC test = %0.2f'%roc_auc1)
plt.plot(fpr_train,tpr_train,"y--",label = 'AUC train = %0.2f'%roc_auc_train1)
plt.title("AUC plot")
plt.xlabel("False positive rate")
plt.ylabel("True positive rate")
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0,1])
plt.ylim([0,1])
plt.legend(loc = "lower right")
plt.show()
dt =DecisionTreeClassifier(max_depth=3,min_samples_split=500)
dt.fit(set1,y_train)
with open("donor_classifier_set1.txt", "w") as f:
f = export_graphviz(dt, out_file=f)
grid.fit(set2,y_train)
print(grid.best_estimator_)
print(grid.best_index_)
print(grid.best_params_)
print(grid.best_score_)
#converting results to dataframe
df = pd.DataFrame(data = grid.cv_results_)
# getting into list
test_score = df["mean_test_score"].values
train_score = df["mean_train_score"].values
print(train_score)
print(test_score )
x = [x for x in range(len(train_score))]
max_depth = [1, 5, 10, 50, 100]
plt.figure(figsize=(10,20))
for i,ms in enumerate(param_grid["min_samples_split"]):
temp = [x for x in range(i,(16)+i+1,4)]
train = [train_score[x] for x in temp]
test = [test_score[x] for x in temp]
plt.subplot(4,1,i+1)
plt.title("min_split: "+str(ms))
plt.plot(max_depth,train )
plt.plot(max_depth,test)
plt.legend(["train","test"])
plt.xlabel("Max_depth")
plt.ylabel("Mean scores")
#plt.figure(figsize=(10,20))
Confusion matrix
y2_predict = grid.predict(set2_t)
cm2 = confusion_matrix(y_test,y2_predict)
# https://seaborn.pydata.org/generated/seaborn.heatmap.html
sns.heatmap(cm2, annot=True, fmt="d")
plt.ylabel("Actual Class")
plt.xlabel("Predicted Class")
plt.title("Confusion Matrix")
Word Cloud
# condition for filtering out false positive data points
l = (y2_predict != y_test) & (y_test ==0)
dump = pd.DataFrame(data=l)
# getting only true points
a = dump.loc[dump["project_is_approved"] == True]
# getting index of false positive data points
ilc = a.index.values.tolist()
dummy = []
for index in ilc:
dummy.append(X_test.loc[index])
# false positive data point dataframe
false_positive = pd.DataFrame(data = dummy)
# getting all text feature on one feature
false_positive["text_data"] = false_positive["essay"] + false_positive["project_title"]
words = " "
for ew in false_positive["text_data"]:
ew = ew.lower()
tokens = ew.split()
for w in tokens:
words += w
# plotting word cloud
Plot_wordcloud(words)
Box plot of price for false datapoints
false_positive.boxplot(column = ["price"])
plt.ylim((0,1000))
plt.ylabel("Precentiles")
plt.title("Box plot of price for false datapoints")
pdf for teacher_number_of_previously_posted_projects
false_positive["teacher_number_of_previously_posted_projects"].plot.kde()
plt.xlabel("Number of previously posted projects")
plt.title("pdf for teacher_number_of_previously_posted_projects")
AUC
# probabilities calcultion
y2_predict_prob = grid.predict_proba(set2_t)[:,1]
y2_predict_prob_train = grid.predict_proba(set2)[:,1]
# took referance from https://stackoverflow.com/questions/25009284/how-to-plot-roc-curve-in-python
#fpr,tpr
fpr,tpr,thre = roc_curve(y_test,y2_predict_prob)
# am i doing it right here......?
fpr_train,tpr_train,thre_train = roc_curve(y_train,y2_predict_prob_train)
# auc calculation for test data
roc_auc2 = metrics.auc(fpr,tpr)
# auc calculation for train data
roc_auc_train2 = metrics.auc(fpr_train,tpr_train)
# took referance from https://stackoverflow.com/questions/25009284/how-to-plot-roc-curve-in-python
plt.plot(fpr,tpr,"b--",label = 'AUC test = %0.2f'%roc_auc2)
plt.plot(fpr_train,tpr_train,"y--",label = 'AUC train = %0.2f'%roc_auc_train2)
plt.title("AUC plot")
plt.xlabel("False positive rate")
plt.ylabel("True positive rate")
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0,1])
plt.ylim([0,1])
plt.legend(loc = "lower right")
plt.show()
dt =DecisionTreeClassifier(max_depth=3,min_samples_split=500)
dt.fit(set2,y_train)
with open("donor_classifier_set2.txt", "w") as f:
f = export_graphviz(dt, out_file=f)
grid.fit(set3,y_train)
print(grid.best_estimator_)
print(grid.best_index_)
print(grid.best_params_)
print(grid.best_score_)
#converting results to dataframe
df = pd.DataFrame(data = grid.cv_results_)
# getting into list
test_score = df["mean_test_score"].values
train_score = df["mean_train_score"].values
print(train_score)
print(test_score )
Scores plottng for train/validation
max_depth = [1, 5, 10, 50, 100]
plt.figure(figsize=(10,20))
for i,ms in enumerate(param_grid["min_samples_split"]):
temp = [x for x in range(i,(16)+i+1,4)]
train = [train_score[x] for x in temp]
test = [test_score[x] for x in temp]
plt.subplot(4,1,i+1)
plt.title("min_split: "+str(ms))
plt.plot(max_depth,train )
plt.plot(max_depth,test)
plt.legend(["train","test"])
plt.xlabel("Max_depth")
plt.ylabel("Mean scores")
#plt.figure(figsize=(10,20))
confusion matrix
y3_predict = grid.predict(set3_t)
cm3 = confusion_matrix(y_test,y3_predict)
# https://seaborn.pydata.org/generated/seaborn.heatmap.html
sns.heatmap(cm3, annot=True, fmt="d")
plt.ylabel("Actual Class")
plt.xlabel("Predicted Class")
plt.title("Confusion Matrix")
Word Cloud
# condition for filtering out false positive data points
l = (y3_predict != y_test) & (y_test ==0)
dump = pd.DataFrame(data=l)
# getting only true points
a = dump.loc[dump["project_is_approved"] == True]
# getting index of false positive data points
ilc = a.index.values.tolist()
dummy = []
for index in ilc:
dummy.append(X_test.loc[index])
# false positive data point dataframe
false_positive = pd.DataFrame(data = dummy)
# getting all text feature on one feature
false_positive["text_data"] = false_positive["essay"] + false_positive["project_title"]
words = " "
for ew in false_positive["text_data"]:
ew = ew.lower()
tokens = ew.split()
for w in tokens:
words += w
# plotting word cloud
Plot_wordcloud(words)
Boxplot
false_positive.boxplot(column = ["price"])
plt.ylim((0,1000))
plt.ylabel("Precentiles")
plt.title("Box plot of price for false datapoints")
KDE
false_positive["teacher_number_of_previously_posted_projects"].plot.kde()
plt.xlabel("Number of previously posted projects")
plt.title("pdf for teacher_number_of_previously_posted_projects")
AUC curve
# probabilities calcultion
y3_predict_prob = grid.predict_proba(set3_t)[:,1]
y3_predict_prob_train = grid.predict_proba(set3)[:,1]
# took referance from https://stackoverflow.com/questions/25009284/how-to-plot-roc-curve-in-python
#fpr,tpr
fpr,tpr,thre = roc_curve(y_test,y3_predict_prob)
# am i doing it right here......?
fpr_train,tpr_train,thre_train = roc_curve(y_train,y3_predict_prob_train)
# auc calculation for test data
roc_auc3 = metrics.auc(fpr,tpr)
# auc calculation for train data
roc_auc_train3 = metrics.auc(fpr_train,tpr_train)
# took referance from https://stackoverflow.com/questions/25009284/how-to-plot-roc-curve-in-python
plt.plot(fpr,tpr,"b--",label = 'AUC test = %0.2f'%roc_auc3)
plt.plot(fpr_train,tpr_train,"y--",label = 'AUC train = %0.2f'%roc_auc_train3)
plt.title("AUC plot")
plt.xlabel("False positive rate")
plt.ylabel("True positive rate")
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0,1])
plt.ylim([0,1])
plt.legend(loc = "lower right")
plt.show()
grid.fit(set4,y_train)
#converting results to dataframe
df = pd.DataFrame(data = grid.cv_results_)
# getting into list
test_score = df["mean_test_score"].values
train_score = df["mean_train_score"].values
print(train_score)
print(test_score )
x = [x for x in range(len(train_score))]
max_depth = [1, 5, 10, 50, 100]
plt.figure(figsize=(10,20))
for i,ms in enumerate(param_grid["min_samples_split"]):
temp = [x for x in range(i,(16)+i+1,4)]
train = [train_score[x] for x in temp]
test = [test_score[x] for x in temp]
plt.subplot(4,1,i+1)
plt.title("min_split: "+str(ms))
plt.plot(max_depth,train )
plt.plot(max_depth,test)
plt.legend(["train","test"])
plt.xlabel("Max_depth")
plt.ylabel("Mean scores")
#plt.figure(figsize=(10,20))
Confusion Matrix
y4_predict = grid.predict(set4_t)
cm1 = confusion_matrix(y_test,y4_predict)
# https://seaborn.pydata.org/generated/seaborn.heatmap.html
sns.heatmap(cm1, annot=True, fmt="d")
plt.ylabel("Actual Class")
plt.xlabel("Predicted Class")
plt.title("Confusion Matrix")
Word Cloud
# condition for filtering out false positive data points
l = (y4_predict != y_test) & (y_test ==0)
dump = pd.DataFrame(data=l)
# getting only true points
a = dump.loc[dump["project_is_approved"] == True]
# getting index of false positive data points
ilc = a.index.values.tolist()
dummy = []
for index in ilc:
dummy.append(X_test.loc[index])
# false positive data point dataframe
false_positive = pd.DataFrame(data = dummy)
# getting all text feature on one feature
false_positive["text_data"] = false_positive["essay"] + false_positive["project_title"]
words = " "
for ew in false_positive["text_data"]:
ew = ew.lower()
tokens = ew.split()
for w in tokens:
words += w
# plotting word cloud
Plot_wordcloud(words)
Boxplot
false_positive.boxplot(column = ["price"])
plt.ylim((0,1000))
plt.ylabel("Precentiles")
plt.title("Box plot of price for false datapoints")
KDE
false_positive["teacher_number_of_previously_posted_projects"].plot.kde()
plt.xlabel("Number of previously posted projects")
plt.title("pdf for teacher_number_of_previously_posted_projects")
AUC Curve
# probabilities calcultion
y4_predict_prob = grid.predict_proba(set4_t)[:,1]
y4_predict_prob_train = grid.predict_proba(set4)[:,1]
# took referance from https://stackoverflow.com/questions/25009284/how-to-plot-roc-curve-in-python
#fpr,tpr
fpr,tpr,thre = roc_curve(y_test,y4_predict_prob)
# am i doing it right here......?
fpr_train,tpr_train,thre_train = roc_curve(y_train,y4_predict_prob_train)
# auc calculation for test data
roc_auc4 = metrics.auc(fpr,tpr)
# auc calculation for train data
roc_auc_train4 = metrics.auc(fpr_train,tpr_train)
# took referance from https://stackoverflow.com/questions/25009284/how-to-plot-roc-curve-in-python
plt.plot(fpr,tpr,"b--",label = 'AUC test = %0.2f'%roc_auc4)
plt.plot(fpr_train,tpr_train,"y--",label = 'AUC train = %0.2f'%roc_auc_train4)
plt.title("AUC plot")
plt.xlabel("False positive rate")
plt.ylabel("True positive rate")
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0,1])
plt.ylim([0,1])
plt.legend(loc = "lower right")
plt.show()
dt =DecisionTreeClassifier(max_depth = 10, min_samples_split= 500)
dt.fit(set2,y_train)
top_feature = dt.feature_importances_
df_feature = pd.DataFrame(data=set_feature,columns=["Feature Name"])
df_feature["Feature Value"] = top_feature
print(len(set_feature),len(top_feature))
df_feature = df_feature.sort_values(by = ["Feature Value"],ascending=False)
df_feature.head()
# selecting top 5k features
df_feature = df_feature[:5000]
# getting the list of indices of top features
index_list = df_feature.index.tolist()
# converting to csr to access elements
s = set2.tocsr()
s_t = set2_t.tocsr()
print(s.shape)
print(s_t.shape)
# Extracting specific columns in numpy array
# https://stackoverflow.com/questions/8386675/extracting-specific-columns-in-numpy-array
s = s[:,index_list]
s_t = s_t[:,index_list]
print(s.shape)
print(s_t.shape)
grid.fit(s,y_train)
#converting results to dataframe
df = pd.DataFrame(data = grid.cv_results_)
# getting into list
test_score = df["mean_test_score"].values
train_score = df["mean_train_score"].values
print(train_score)
print(test_score )
x = [x for x in range(len(train_score))]
max_depth = [1, 5, 10, 50, 100]
plt.figure(figsize=(10,20))
for i,ms in enumerate(param_grid["min_samples_split"]):
temp = [x for x in range(i,(16)+i+1,4)]
train = [train_score[x] for x in temp]
test = [test_score[x] for x in temp]
plt.subplot(4,1,i+1)
plt.title("min_split: "+str(ms))
plt.plot(max_depth,train )
plt.plot(max_depth,test)
plt.legend(["train","test"])
plt.xlabel("Max_depth")
plt.ylabel("Mean scores")
#plt.figure(figsize=(10,20))
Confusion Matrix
y5_predict = grid.predict(s_t)
cm5 = confusion_matrix(y_test,y5_predict)
# https://seaborn.pydata.org/generated/seaborn.heatmap.html
sns.heatmap(cm1, annot=True, fmt="d")
plt.ylabel("Actual Class")
plt.xlabel("Predicted Class")
plt.title("Confusion Matrix")
Word Cloud
# condition for filtering out false positive data points
l = (y5_predict != y_test) & (y_test ==0)
dump = pd.DataFrame(data=l)
# getting only true points
a = dump.loc[dump["project_is_approved"] == True]
# getting index of false positive data points
ilc = a.index.values.tolist()
dummy = []
for index in ilc:
dummy.append(X_test.loc[index])
# false positive data point dataframe
false_positive = pd.DataFrame(data = dummy)
# getting all text feature on one feature
false_positive["text_data"] = false_positive["essay"] + false_positive["project_title"]
words = " "
for ew in false_positive["text_data"]:
ew = ew.lower()
tokens = ew.split()
for w in tokens:
words += w
# plotting word cloud
Plot_wordcloud(words)
Box Plot
false_positive.boxplot(column = ["price"])
plt.ylim((0,1000))
plt.ylabel("Precentiles")
plt.title("Box plot of price for false datapoints")
KDE
false_positive["teacher_number_of_previously_posted_projects"].plot.kde()
plt.xlabel("Number of previously posted projects")
plt.title("pdf for teacher_number_of_previously_posted_projects")
AUC CURVE
# probabilities calcultion
y5_predict_prob = grid.predict_proba(s_t)[:,1]
y5_predict_prob_train = grid.predict_proba(s)[:,1]
# took referance from https://stackoverflow.com/questions/25009284/how-to-plot-roc-curve-in-python
#fpr,tpr
fpr,tpr,thre = roc_curve(y_test,y5_predict_prob)
# am i doing it right here......?
fpr_train,tpr_train,thre_train = roc_curve(y_train,y5_predict_prob_train)
# auc calculation for test data
roc_auc5 = metrics.auc(fpr,tpr)
# auc calculation for train data
roc_auc_train5 = metrics.auc(fpr_train,tpr_train)
# took referance from https://stackoverflow.com/questions/25009284/how-to-plot-roc-curve-in-python
plt.plot(fpr,tpr,"b--",label = 'AUC test = %0.2f'%roc_auc5)
plt.plot(fpr_train,tpr_train,"y--",label = 'AUC train = %0.2f'%roc_auc_train5)
plt.title("AUC plot")
plt.xlabel("False positive rate")
plt.ylabel("True positive rate")
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0,1])
plt.ylim([0,1])
plt.legend(loc = "lower right")
plt.show()
from prettytable import PrettyTable
summary = PrettyTable()
summary.field_names = ["Set", "Vectorizer", "Model", "Hyperparameter","Test","Train"]
summary.add_row(["set1","BOW","DT","Depth = 10,min_samples = 500","%0.3f"%roc_auc1,"%0.3f"%roc_auc_train1])
summary.add_row(["set2","TFIDF","DT","Depth = 10,min_samples = 500","%0.3f"%roc_auc2,"%0.3f"%roc_auc_train2])
summary.add_row(["set1","Avg-W2v","DT","Depth = 10,min_samples = 500","%0.3f"%roc_auc3,"%0.3f"%roc_auc_train3])
summary.add_row(["set2","TFIDF W2V","DT","Depth = 10,min_samples = 500","%0.3f"%roc_auc4,"%0.3f"%roc_auc_train4])
summary.add_row(["set1","TFIDF top 5k","DT","Depth = 10,min_samples = 500","%0.3f"%roc_auc5,"%0.3f"%roc_auc_train5])
print(summary)